return 0;
}
+static int svm_cpu_prepare(unsigned int cpu)
+{
+ if ( ((hsa[cpu] == NULL) &&
+ ((hsa[cpu] = alloc_host_save_area()) == NULL)) ||
+ ((root_vmcb[cpu] == NULL) &&
+ ((root_vmcb[cpu] = alloc_vmcb()) == NULL)) )
+ return -ENOMEM;
+ return 0;
+}
+
static int svm_cpu_up(struct cpuinfo_x86 *c)
{
u32 eax, edx, phys_hsa_lo, phys_hsa_hi;
return 0;
}
- if ( ((hsa[cpu] == NULL) &&
- ((hsa[cpu] = alloc_host_save_area()) == NULL)) ||
- ((root_vmcb[cpu] == NULL) &&
- ((root_vmcb[cpu] = alloc_vmcb()) == NULL)) )
+ if ( svm_cpu_prepare(cpu) != 0 )
return 0;
write_efer(read_efer() | EFER_SVME);
static struct hvm_function_table __read_mostly svm_function_table = {
.name = "SVM",
+ .cpu_prepare = svm_cpu_prepare,
.cpu_down = svm_cpu_down,
.domain_initialise = svm_domain_initialise,
.domain_destroy = svm_domain_destroy,
local_irq_restore(flags);
}
+int vmx_cpu_prepare(unsigned int cpu)
+{
+ if ( per_cpu(host_vmcs, cpu) != NULL )
+ return 0;
+
+ per_cpu(host_vmcs, cpu) = vmx_alloc_vmcs();
+ if ( per_cpu(host_vmcs, cpu) != NULL )
+ return 0;
+
+ printk("CPU%d: Could not allocate host VMCS\n", cpu);
+ return -ENOMEM;
+}
+
int vmx_cpu_up(void)
{
u32 eax, edx;
INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
- if ( this_cpu(host_vmcs) == NULL )
- {
- this_cpu(host_vmcs) = vmx_alloc_vmcs();
- if ( this_cpu(host_vmcs) == NULL )
- {
- printk("CPU%d: Could not allocate host VMCS\n", cpu);
- return 0;
- }
- }
+ if ( vmx_cpu_prepare(cpu) != 0 )
+ return 0;
switch ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
{
static struct hvm_function_table __read_mostly vmx_function_table = {
.name = "VMX",
+ .cpu_prepare = vmx_cpu_prepare,
.domain_initialise = vmx_domain_initialise,
.domain_destroy = vmx_domain_destroy,
.vcpu_initialise = vmx_vcpu_initialise,
int __devinit __cpu_up(unsigned int cpu)
{
- int ret = 0;
+ int ret;
+
+ ret = hvm_cpu_prepare(cpu);
+ if (ret)
+ return ret;
/*
* We do warm boot only on cpus that had booted earlier
int (*event_pending)(struct vcpu *v);
int (*do_pmu_interrupt)(struct cpu_user_regs *regs);
+ int (*cpu_prepare)(unsigned int cpu);
int (*cpu_up)(void);
void (*cpu_down)(void);
void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);
int hvm_gtsc_need_scale(struct domain *d);
+static inline int
+hvm_cpu_prepare(unsigned int cpu)
+{
+ return (hvm_funcs.cpu_prepare ? hvm_funcs.cpu_prepare(cpu) : 0);
+}
+
static inline int hvm_cpu_up(void)
{
- if ( hvm_funcs.cpu_up )
- return hvm_funcs.cpu_up();
- return 1;
+ return (hvm_funcs.cpu_up ? hvm_funcs.cpu_up() : 1);
}
static inline void hvm_cpu_down(void)
extern void start_vmx(void);
extern void vmcs_dump_vcpu(struct vcpu *v);
extern void setup_vmcs_dump(void);
+extern int vmx_cpu_prepare(unsigned int cpu);
extern int vmx_cpu_up(void);
extern void vmx_cpu_down(void);